MeUtils 2025.8.29.20.5.48__py3-none-any.whl → 2025.9.5.20.57.37__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (29) hide show
  1. examples/_openaisdk/openai_router.py +14 -4
  2. examples/_openaisdk/openai_volc_doubao.py +139 -0
  3. meutils/apis/chatglm/glm_video_api.py +1 -1
  4. meutils/apis/google/chat.py +6 -2
  5. meutils/apis/google/images.py +25 -19
  6. meutils/apis/images/generations.py +7 -6
  7. meutils/{db/id2redis.py → apis/meituan/__init__.py} +2 -3
  8. meutils/apis/meituan/chat.py +109 -0
  9. meutils/apis/minimax/videos.py +11 -7
  10. meutils/apis/oneapi/tasks.py +3 -1
  11. meutils/apis/ppio/videos.py +1 -1
  12. meutils/clis/server.py +1 -3
  13. meutils/data/VERSION +1 -1
  14. meutils/io/_openai_files.py +1 -0
  15. meutils/io/files_utils.py +17 -2
  16. meutils/llm/check_utils.py +35 -4
  17. meutils/llm/openai_polling/chat.py +66 -15
  18. meutils/llm/openai_utils/adapters.py +45 -30
  19. meutils/schemas/oneapi/common.py +7 -0
  20. meutils/serving/fastapi/gunicorn.conf.py +4 -4
  21. meutils/str_utils/__init__.py +1 -1
  22. meutils/str_utils/regular_expression.py +17 -2
  23. {meutils-2025.8.29.20.5.48.dist-info → meutils-2025.9.5.20.57.37.dist-info}/METADATA +262 -262
  24. {meutils-2025.8.29.20.5.48.dist-info → meutils-2025.9.5.20.57.37.dist-info}/RECORD +28 -27
  25. examples/_openaisdk/openai_doubao.py +0 -67
  26. {meutils-2025.8.29.20.5.48.dist-info → meutils-2025.9.5.20.57.37.dist-info}/WHEEL +0 -0
  27. {meutils-2025.8.29.20.5.48.dist-info → meutils-2025.9.5.20.57.37.dist-info}/entry_points.txt +0 -0
  28. {meutils-2025.8.29.20.5.48.dist-info → meutils-2025.9.5.20.57.37.dist-info}/licenses/LICENSE +0 -0
  29. {meutils-2025.8.29.20.5.48.dist-info → meutils-2025.9.5.20.57.37.dist-info}/top_level.txt +0 -0
@@ -12,6 +12,7 @@ from meutils.pipe import *
12
12
  from openai import OpenAI
13
13
  from os import getenv
14
14
  from meutils.io.files_utils import to_url
15
+ from meutils.str_utils import parse_base64
15
16
 
16
17
  # gets API Key from environment variable OPENAI_API_KEY
17
18
  client = OpenAI(
@@ -23,8 +24,10 @@ client = OpenAI(
23
24
  #
24
25
  # api_key="sk-Azgp1thTIonR7IdIEqlJU51tpDYNIYYpxHvAZwFeJiOdVWiz"
25
26
 
26
- base_url="https://api.huandutech.com/v1",
27
- api_key = "sk-qOpbMHesasoVgX75ZoeEeBEf1R9dmsUZVAPcu5KkvLFhElrn"
27
+ # base_url="https://api.huandutech.com/v1",
28
+ # api_key = "sk-qOpbMHesasoVgX75ZoeEeBEf1R9dmsUZVAPcu5KkvLFhElrn"
29
+ api_key="sk-MAZ6SELJVtGNX6jgIcZBKuttsRibaDlAskFAnR7WD6PBSN6M",
30
+ base_url="https://new.yunai.link/v1"
28
31
  )
29
32
 
30
33
  completion = client.chat.completions.create(
@@ -46,7 +49,7 @@ completion = client.chat.completions.create(
46
49
  "content": [
47
50
  {
48
51
  "type": "text",
49
- "text": "带个墨镜"
52
+ "text": "旁边,画条狗,带个墨镜"
50
53
  },
51
54
  {
52
55
  "type": "image_url",
@@ -58,10 +61,17 @@ completion = client.chat.completions.create(
58
61
  }
59
62
  ]
60
63
  )
61
- # print(completion.choices[0].message.content)
64
+ print(completion.choices[0].message.content)
62
65
  # arun(to_url(completion.choices[0].message.images[0]['image_url']['url'], content_type="image/png"))
63
66
 
64
67
 
68
+
69
+ b64_list = parse_base64(completion.choices[0].message.content)
70
+
71
+ arun(to_url(b64_list, content_type="image/png"))
72
+
73
+
74
+ # '好的,旁边加一只戴墨镜的狗。\n\n![image](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAABAAAAAQACAIAAADwf7zUAAAgAElEQ'
65
75
  # arun(to_url(completion.choices[0].message.images[0]['image_url']['url'], content_type="image/png"))
66
76
 
67
77
  # print(dict(completion.choices[0].message).keys())
@@ -0,0 +1,139 @@
1
+ #!/usr/bin/env python
2
+ # -*- coding: utf-8 -*-
3
+ # @Project : AI. @by PyCharm
4
+ # @File : openai_siliconflow
5
+ # @Time : 2024/6/26 10:42
6
+ # @Author : betterme
7
+ # @WeChat : meutils
8
+ # @Software : PyCharm
9
+ # @Description : todo 区分能thinking的模型
10
+ import os
11
+
12
+ from meutils.pipe import *
13
+ from openai import OpenAI
14
+ from openai import OpenAI, APIStatusError
15
+
16
+ # 404 403 429
17
+ client = OpenAI(
18
+
19
+ base_url=os.getenv("VOLC_BASE_URL"), # /chat/completions
20
+ api_key=os.getenv("VOLC_API_KEY")
21
+
22
+ # api_key=os.getenv("OPENAI_API_KEY") +'-3587'
23
+
24
+ )
25
+ models = """
26
+ doubao-1.5-vision-pro-250328
27
+ doubao-1-5-vision-pro-32k-250115
28
+ doubao-1-5-ui-tars-250428
29
+ doubao-1-5-pro-32k-250115
30
+ doubao-1-5-pro-256k-250115
31
+ doubao-1-5-pro-32k-character-250715
32
+ doubao-1-5-pro-32k-character-250228
33
+ doubao-1-5-thinking-pro-250415
34
+ doubao-1-5-thinking-pro-m-250428
35
+ doubao-1-5-thinking-vision-pro-250428
36
+ """.split()
37
+
38
+ models = """
39
+ doubao-seed-1.6-250615
40
+ doubao-seed-1-6-250615
41
+ doubao-seed-1-6-vision-250815
42
+
43
+ doubao-seed-1-6-flash-250715
44
+ doubao-seed-1-6-flash-250615
45
+
46
+ doubao-seed-1-6-thinking-250615
47
+ doubao-seed-1-6-thinking-250715
48
+ """.split()
49
+
50
+ models = ['doubao-seed-1-6-vision-250815']
51
+
52
+ models = ["doubao-1-5-pro-32k-250115"]
53
+ models = {
54
+ "doubao-1-5-pro-32k-250115",
55
+ "doubao-1-5-pro-256k-250115",
56
+ "doubao-1-5-pro-32k-character-250715",
57
+ "doubao-1-5-pro-32k-character-250228",
58
+ "doubao-1.5-vision-pro-250328",
59
+ "doubao-1-5-vision-pro-32k-250115",
60
+ "doubao-1-5-thinking-pro-250415",
61
+ "doubao-1-5-thinking-pro-m-250428",
62
+ "doubao-1-5-thinking-vision-pro-250428",
63
+ "doubao-1-5-ui-tars-250428",
64
+ }
65
+
66
+ models = {
67
+ "doubao-1-5-thinking-pro-250415",
68
+ "doubao-1-5-thinking-pro-m-250428",
69
+ "doubao-1-5-thinking-vision-pro-250428",
70
+ "doubao-1-5-ui-tars-250428",
71
+ }
72
+
73
+ models = {
74
+ "doubao-seed-1.6-250615",
75
+ "doubao-seed-1-6-250615",
76
+ "doubao-seed-1-6-vision-250815",
77
+ "doubao-seed-1-6-thinking-250615",
78
+ "doubao-seed-1-6-thinking-250715"
79
+ }
80
+
81
+
82
+ def run(model="deepseek-r1-250528", thinking="disabled"):
83
+ try:
84
+ completion = client.chat.completions.create(
85
+ # model="ep-20241225184145-7nf5n",
86
+ model=model,
87
+ # model="doubao-1-5-pro-32k-250115",
88
+ # model="doubao-1-5-thinking-vision-pro-250428",
89
+
90
+ # model="doubao-lite-32k-character",
91
+ # model="doubao-pro-32k-character",
92
+
93
+ # model="doubao-pro-32k-search",
94
+
95
+ messages=[
96
+ {
97
+ "role": "user",
98
+ "content": [
99
+ {
100
+ "type": "text",
101
+ "text": "总结"
102
+ },
103
+ # {
104
+ # "type": "image_url",
105
+ # "image_url": {
106
+ # "url": "https://ark-project.tos-cn-beijing.ivolces.com/images/view.jpeg"
107
+ # }
108
+ # }
109
+ ]
110
+ }
111
+ ],
112
+ # top_p=0.7,
113
+ top_p=None,
114
+ temperature=None,
115
+ stream=False,
116
+ stream_options={"include_usage": True},
117
+ max_tokens=5,
118
+
119
+ # extra_body={
120
+ # "thinking": {
121
+ # "type": thinking
122
+ # }
123
+ # }
124
+ )
125
+ print(completion)
126
+ except APIStatusError as e:
127
+ print(e.status_code)
128
+
129
+ print(e.response)
130
+ print(e.message)
131
+ print(e.code)
132
+ logger.debug(f'{model} {thinking}')
133
+
134
+
135
+ if __name__ == '__main__':
136
+ for model in models:
137
+ # run(model)
138
+ run(model, thinking="enabled")
139
+ # run(model, thinking="auto")
@@ -109,7 +109,7 @@ async def generate(request: ImageRequest, n: int = 30): # 兼容dalle3
109
109
 
110
110
  # VideoResult
111
111
  if __name__ == '__main__':
112
- api_key = "c98aa404b0224690b211c5d1e420db2c.qGaByuJATne08QUx"
112
+ api_key = "03bdb799cbcb4a8cac609f9d5ebe02e7.snp1yga8VEEzO2bk"
113
113
 
114
114
  # api_key = "c98aa404b0224690b211c5d1e420db2c.qGaByuJATne08QUx"
115
115
  # api_key = "7d10426c06afa81e8d7401d97781249c.DbqlSsicRtaUdKXI" # 新号
@@ -9,7 +9,7 @@
9
9
  # @Description : https://ai.google.dev/gemini-api/docs/openai?hl=zh-cn
10
10
  # genai => openai
11
11
  # https://googleapis.github.io/python-genai/genai.html#module-genai.models
12
-
12
+ import shortuuid
13
13
 
14
14
  from meutils.pipe import *
15
15
  from meutils.decorators.retry import retrying
@@ -233,7 +233,7 @@ class Completions(object):
233
233
  _response = await edit_image(_)
234
234
  url = dict(_response.data[0])["url"]
235
235
  else:
236
- url = await to_url(part.inline_data.data, mime_type=part.inline_data.mime_type)
236
+ url = await to_url(part.inline_data.data, filename=f'{shortuuid.random()}.png', mime_type=part.inline_data.mime_type)
237
237
 
238
238
  image_response.data.append({"url": url, "revised_prompt": part.text})
239
239
 
@@ -284,6 +284,7 @@ class Completions(object):
284
284
  if part.inline_data:
285
285
  image_url = await to_url(
286
286
  part.inline_data.data,
287
+ filename=f'{shortuuid.random()}.png',
287
288
  mime_type=part.inline_data.mime_type
288
289
  )
289
290
  yield f"![image_url]({image_url})"
@@ -511,6 +512,9 @@ if __name__ == '__main__':
511
512
  base_url = "https://api.huandutech.com"
512
513
  api_key = "sk-qOpbMHesasoVgX75ZoeEeBEf1R9dmsUZVAPcu5KkvLFhElrn"
513
514
 
515
+ base_url = "https://new.yunai.link"
516
+ api_key = "sk-MAZ6SELJVtGNX6jgIcZBKuttsRibaDlAskFAnR7WD6PBSN6M"
517
+
514
518
  # arun(Completions(base_url=base_url, api_key=api_key).create_for_images(request))
515
519
  # arun(Completions(base_url=base_url, api_key=api_key).generate(request))
516
520
 
@@ -20,7 +20,8 @@
20
20
  """
21
21
 
22
22
  from meutils.pipe import *
23
- from meutils.io.files_utils import to_url
23
+ from meutils.io.files_utils import to_url, to_base64
24
+ from meutils.str_utils import parse_base64
24
25
  from meutils.llm.clients import AsyncOpenAI
25
26
  from meutils.apis.images.edits import edit_image, ImageProcess
26
27
 
@@ -28,14 +29,18 @@ from meutils.schemas.image_types import ImageRequest, ImagesResponse
28
29
  from meutils.schemas.openai_types import CompletionRequest
29
30
 
30
31
 
31
- async def generate(request: ImageRequest, api_key: Optional[str] = None, base_url: Optional[str] = None):
32
+ async def openai_generate(request: ImageRequest, api_key: Optional[str] = None, base_url: Optional[str] = None):
33
+ api_key = api_key or os.getenv("OPENROUTER_API_KEY")
34
+
32
35
  is_hd = False
33
36
  if request.model.endswith("-hd"):
34
37
  is_hd = True
35
38
  request.model = request.model.removesuffix("-hd")
36
39
 
37
40
  image_urls = request.image_urls
38
- image_urls = await to_url(image_urls, filename='.png', content_type="image/png")
41
+ # image_urls = await to_url(image_urls, filename='.png', content_type="image/png")
42
+ # image_urls = await to_base64(image_urls, content_type="image/png")
43
+
39
44
  image_urls = [
40
45
  {
41
46
  "type": "image_url",
@@ -77,14 +82,12 @@ async def generate(request: ImageRequest, api_key: Optional[str] = None, base_ur
77
82
 
78
83
  completion = await client.chat.completions.create(**data)
79
84
  # logger.debug(completion)
80
- if (
81
- completion
82
- and completion.choices
83
- and hasattr(completion.choices[0].message, "images")
84
- and (images := completion.choices[0].message.images)
85
- ):
86
- image_urls = [image['image_url']['url'] for image in images]
87
- # logger.debug(image_urls)
85
+ if completion and completion.choices and (revised_prompt := completion.choices[0].message.content):
86
+ if (hasattr(completion.choices[0].message, "images") and (images := completion.choices[0].message.images)):
87
+
88
+ image_urls = [image['image_url']['url'] for image in images]
89
+ else:
90
+ image_urls = parse_base64(revised_prompt)
88
91
 
89
92
  if is_hd:
90
93
  # logger.debug(image_urls)
@@ -95,7 +98,7 @@ async def generate(request: ImageRequest, api_key: Optional[str] = None, base_ur
95
98
  response = ImagesResponse(image=image_urls)
96
99
 
97
100
  else:
98
- image_urls = await to_url(image_urls, filename=f"{shortuuid.random()}.png", content_type="image/png")
101
+ image_urls = await to_url(image_urls, filename=f'{shortuuid.random()}.png', content_type="image/png")
99
102
  response = ImagesResponse(image=image_urls)
100
103
 
101
104
  # logger.debug(response)
@@ -103,25 +106,28 @@ async def generate(request: ImageRequest, api_key: Optional[str] = None, base_ur
103
106
  if response.data:
104
107
  return response
105
108
 
106
- raise Exception(f"image generate failed: {completion}")
109
+ raise Exception(f"Image generate failed: {completion}")
107
110
 
108
111
 
109
112
  if __name__ == '__main__':
110
- base_url = "https://all.chatfire.cn/openrouter/v1"
111
- api_key = os.getenv("OPENROUTER_API_KEY")
113
+ # base_url = "https://all.chatfire.cn/openrouter/v1"
114
+ # api_key = os.getenv("OPENROUTER_API_KEY")
115
+
116
+ api_key = "sk-MAZ6SELJVtGNX6jgIcZBKuttsRibaDlAskFAnR7WD6PBSN6M"
117
+ base_url = "https://new.yunai.link/v1"
112
118
 
113
119
  request = ImageRequest(
114
120
  # model="google/gemini-2.5-flash-image-preview:free",
115
- model="google/gemini-2.5-flash-image-preview:free-hd",
121
+ # model="google/gemini-2.5-flash-image-preview:free-hd",
116
122
 
117
- # model="gemini-2.5-flash-image-preview",
123
+ model="gemini-2.5-flash-image-preview",
118
124
 
119
125
  prompt="带个墨镜",
120
- # image=["https://oss.ffire.cc/files/kling_watermark.png"],
126
+ image=["https://oss.ffire.cc/files/kling_watermark.png"],
121
127
  )
122
128
 
123
129
  r = arun(
124
- generate(
130
+ openai_generate(
125
131
  request, base_url=base_url, api_key=api_key
126
132
  )
127
133
  )
@@ -25,7 +25,7 @@ from meutils.apis.jimeng.images import generate as jimeng_generate
25
25
 
26
26
  from meutils.apis.qwen.chat import Completions as QwenCompletions
27
27
  from meutils.apis.google.chat import Completions as GoogleCompletions
28
- from meutils.apis.google.images import generate as google_generate
28
+ from meutils.apis.google.images import openai_generate
29
29
 
30
30
 
31
31
  async def generate(
@@ -59,11 +59,12 @@ async def generate(
59
59
  request.image = request.image[-1]
60
60
  return await QwenCompletions(api_key=api_key).generate(request)
61
61
 
62
- if request.model.startswith(("gemini",)):
63
- return await GoogleCompletions(base_url=base_url, api_key=api_key).generate(request)
64
-
65
- if request.model.startswith(("google/gemini",)): # openrouter
66
- return await google_generate(request, base_url=base_url, api_key=api_key)
62
+ if request.model.startswith(("google/gemini", "gemini")): # openrouter
63
+ if api_key.endswith("-openai"):
64
+ api_key = api_key.removesuffix("-openai")
65
+ return await openai_generate(request, base_url=base_url, api_key=api_key)
66
+ else:
67
+ return await GoogleCompletions(base_url=base_url, api_key=api_key).generate(request) # 原生接口
67
68
 
68
69
  # 其他
69
70
  data = {
@@ -1,12 +1,11 @@
1
1
  #!/usr/bin/env python
2
2
  # -*- coding: utf-8 -*-
3
3
  # @Project : AI. @by PyCharm
4
- # @File : id2redis
5
- # @Time : 2025/8/26 17:21
4
+ # @File : __init__.py
5
+ # @Time : 2025/9/3 14:40
6
6
  # @Author : betterme
7
7
  # @WeChat : meutils
8
8
  # @Software : PyCharm
9
9
  # @Description :
10
10
 
11
11
  from meutils.pipe import *
12
-
@@ -0,0 +1,109 @@
1
+ #!/usr/bin/env python
2
+ # -*- coding: utf-8 -*-
3
+ # @Project : AI. @by PyCharm
4
+ # @File : chat
5
+ # @Time : 2025/9/3 14:41
6
+ # @Author : betterme
7
+ # @WeChat : meutils
8
+ # @Software : PyCharm
9
+ # @Description : todo
10
+
11
+
12
+ from openai import AsyncOpenAI
13
+
14
+ from meutils.pipe import *
15
+ from meutils.decorators.retry import retrying
16
+ from meutils.io.files_utils import to_bytes, guess_mime_type
17
+ from meutils.caches import rcache
18
+
19
+ from meutils.llm.openai_utils import to_openai_params
20
+
21
+ from meutils.config_utils.lark_utils import get_next_token_for_polling
22
+ from meutils.schemas.openai_types import chat_completion, chat_completion_chunk, CompletionRequest, CompletionUsage, \
23
+ ChatCompletion
24
+
25
+ base_url = "https://longcat.chat/api/v1/chat-completion"
26
+ base_url = "https://longcat.chat/api/v1/chat-completion-oversea"
27
+ base_url = "https://longcat.chat/api/v1"
28
+
29
+
30
+ class Completions(object):
31
+ def __init__(self, api_key: Optional[str] = None):
32
+ self.api_key = api_key
33
+
34
+ async def create(self, request: CompletionRequest, **kwargs):
35
+ payload = self.requset2payload(request)
36
+ payload['conversationId'] = await self.create_chat()
37
+
38
+ logger.debug(payload)
39
+
40
+ headers = {
41
+ 'Cookie': self.api_key
42
+ }
43
+
44
+ async with httpx.AsyncClient(base_url=base_url, headers=headers, timeout=100) as client:
45
+ async with client.stream("POST", "/chat-completion", json=payload) as response:
46
+ logger.debug(response.status_code)
47
+ response.raise_for_status()
48
+
49
+ async for chunk in response.aiter_lines():
50
+ logger.debug(chunk)
51
+
52
+ def requset2payload(self, request: CompletionRequest):
53
+ payload = {
54
+ "content": request.last_user_content, # todo: 多轮
55
+ "messages": [
56
+ # {
57
+ # "role": "user",
58
+ # "content": "hi",
59
+ # "chatStatus": "FINISHED",
60
+ # "messageId": 11263291,
61
+ # "idType": "custom"
62
+ # },
63
+ # {
64
+ # "role": "assistant",
65
+ # "content": "",
66
+ # "chatStatus": "LOADING",
67
+ # "messageId": 92193819,
68
+ # "idType": "custom"
69
+ # }
70
+ ],
71
+ "reasonEnabled": 0,
72
+ "searchEnabled": 0,
73
+ "regenerate": 0
74
+ }
75
+
76
+ return payload
77
+
78
+ async def create_chat(self):
79
+ headers = {
80
+ 'Cookie': self.api_key
81
+ }
82
+ payload = {
83
+ "model": "",
84
+ "agentId": ""
85
+ }
86
+ async with httpx.AsyncClient(base_url=base_url, headers=headers, timeout=100) as client:
87
+ response = await client.post("/session-create", json=payload)
88
+ response.raise_for_status()
89
+ # {'code': 0,
90
+ # 'data': {'agent': '1',
91
+ # 'conversationId': 'c1731258-230a-4b2e-b7ef-ea5e83c38e0e',
92
+ # 'createAt': 1756883097539,
93
+ # 'currentMessageId': 0,
94
+ # 'label': '今天',
95
+ # 'model': 'LongCat',
96
+ # 'title': '新对话',
97
+ # 'titleType': 'SYSTEM',
98
+ # 'updateAt': 1756883097539},
99
+ # 'message': 'success'}
100
+ return response.json()['data']['conversationId']
101
+
102
+
103
+ if __name__ == '__main__':
104
+ cookie = "_lxsdk_cuid=1990e1e8790c8-0b2a66e23040a48-16525636-1fa400-1990e1e8790c8; passport_token_key=AgEGIygg22VuoMYTPonur9FA_-EVg9UXLu3LYOzJ4kIHSjQZeSNhwpytTU_cZFP6V1Juhk0CHMrAgwAAAABYLAAA9vXtnciaZBu2V99EMRJYRHTDSraV_OPLemUuVpi2WLsaa6RqC0PAKAOm6W_hIpbV"
105
+ request = CompletionRequest(
106
+ messages=[{'role': 'user', 'content': '你好'}]
107
+ )
108
+ arun(Completions(api_key=cookie).create(request))
109
+ # arun(Completions(api_key=cookie).create_chat())
@@ -101,14 +101,18 @@ if __name__ == '__main__':
101
101
  "duration": 6,
102
102
  "resolution": "1080P"
103
103
  } # 299392563388531
104
- arun(
105
- create_task(data)
106
- )
107
104
 
108
- arun(
109
- get_task('299392563388531')
110
- )
105
+
106
+ token = """eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJHcm91cE5hbWUiOiJhYXZ5IHJ5ZGgiLCJVc2VyTmFtZSI6ImFhdnkgcnlkaCIsIkFjY291bnQiOiIiLCJTdWJqZWN0SUQiOiIxOTI0MzY5NjUwMjE3MzkwNDMyIiwiUGhvbmUiOiIiLCJHcm91cElEIjoiMTkyNDM2OTY1MDIwOTAwMTgyNCIsIlBhZ2VOYW1lIjoiIiwiTWFpbCI6IjJ4MDhobnJiQHl5dS5oZGVybm0uY29tIiwiQ3JlYXRlVGltZSI6IjIwMjUtMDUtMTkgMTY6MzY6MzMiLCJUb2tlblR5cGUiOjEsImlzcyI6Im1pbmltYXgifQ.ZQ_cSiErTQNHT37w8Ie2nIy4zLfmo0sBXbuIQd0uEU_HDyLn4WBrJ6O-CAnWldtxi9PY53YHZW6zTb33S9zDx4VcXVRO3Nxl5o2WQNYRj3KxxNtHWAGwtA-cCplmgY71m-Xe4kZtN-K25tgXVcWbdze4nev_OGdkDPHBxfhiP462P0wgQ_tqkl5BgTxgUhcskYNs6JogNQUP4c1LFoSR6vYoAYek95K199ehpBuE1jkLFa2JDzNlKlVq_e2LPZkwA7qW67Ih0yONFDEtvM5GXr9ZMjyFIhww4hIeYPGTpqwHnjY00GUzlh4F9e_gpsx-FLqxZn0Xfnhyz8YvUDidfQ"""
111
107
 
112
108
  arun(
113
- get_file('299393334087796')
109
+ create_task(data, api_key=token)
114
110
  )
111
+
112
+ # arun(
113
+ # get_task('299392563388531')
114
+ # )
115
+
116
+ # arun(
117
+ # get_file('299393334087796')
118
+ # )
@@ -29,6 +29,7 @@ ACTIONS = {
29
29
  "cogvideox": "https://api.chatfire.cn/zhipuai/v1/async-result/{task_id}",
30
30
 
31
31
  "minimax": "https://api.chatfire.cn/minimax/v2/async/minimax-hailuo-02",
32
+ # :"https://api.chatfire.cn/minimax/v1/query/video_generation",
32
33
 
33
34
  "wan": "https://api.chatfire.cn/sf/v1/videos/generations", # wan-ai-wan2.1-t2v-14b 可能还有其他平台
34
35
 
@@ -80,7 +81,7 @@ async def polling_tasks(platform: str = "flux", action: str = "", status: str =
80
81
  if items := response['data']['items']:
81
82
  tasks = []
82
83
  model = ''
83
- for item in items[:128]: # 批量更新
84
+ for item in items[:64]: # 批量更新
84
85
  task_id = item['task_id']
85
86
  action = item['action'].split('-', maxsplit=1)[0] # 模糊匹配
86
87
  if 'fal-' in item['action']:
@@ -151,5 +152,6 @@ if __name__ == '__main__':
151
152
  # arun(get_tasks(action="jimeng-video-3.0", status="FAILURE"))
152
153
  # arun(get_tasks(action="jimeng-video-3.0", return_ids=True))
153
154
 
155
+ arun(get_tasks(action="jimeng-video-3.0", return_ids=True))
154
156
 
155
157
  # arun(refund_tasks())
@@ -33,7 +33,7 @@ async def get_valid_token(min_points=18000):
33
33
  async def create_task(request: VideoRequest, api_key: Optional[str] = None):
34
34
  api_key = api_key or await get_valid_token()
35
35
 
36
- api_key="sk_4Ja29OIUBVwKo5GWx-PRTsRcTyxxRjZDpYxSdPg75QU"
36
+ # api_key="sk_4Ja29OIUBVwKo5GWx-PRTsRcTyxxRjZDpYxSdPg75QU"
37
37
 
38
38
  path = "/async/minimax-hailuo-02"
39
39
 
meutils/clis/server.py CHANGED
@@ -41,7 +41,7 @@ def gunicorn_run(
41
41
  preload: bool = False,
42
42
  pythonpath: str = 'python',
43
43
  gunicorn_conf: Optional[str] = None,
44
- max_requests: int = 1000,
44
+ max_requests: int = 1024 * 2,
45
45
  timeout: int = 300
46
46
  ):
47
47
  """
@@ -70,8 +70,6 @@ def gunicorn_run(
70
70
  os.system(cmd)
71
71
 
72
72
 
73
-
74
-
75
73
  @cli.command()
76
74
  def uvicorn_run(
77
75
  app: str,
meutils/data/VERSION CHANGED
@@ -1 +1 @@
1
- 2025.08.29.20.05.48
1
+ 2025.09.05.20.57.37
@@ -23,6 +23,7 @@ async def file_extract(file): # "https://oss.ffire.cc/files/招标文件备案
23
23
  # file=file,
24
24
  # file=("filename.pdf", file),
25
25
  file=(filename, file, mime_type),
26
+
26
27
  purpose="file-extract"
27
28
  )
28
29
  logger.debug(file_object)
meutils/io/files_utils.py CHANGED
@@ -211,7 +211,13 @@ async def to_file(file: Union[UploadFile, str, bytes], filename: Optional[str] =
211
211
  return Path(filename).resolve()
212
212
 
213
213
 
214
- async def markdown_base64_to_url(text, pattern=r'!\[Image_\d+\]\((.+?)\)'):
214
+ async def markdown_base64_to_url(text, pattern=r'!\[.*?\]\((.*?)\)'):
215
+ """
216
+ :param text:
217
+ :param pattern:
218
+ pattern=r'!\[.*?\]\((data:image/.*?)\)'
219
+ :return:
220
+ """
215
221
  base64_strings = re.findall(pattern, text)
216
222
 
217
223
  # logger.debug(text)
@@ -335,4 +341,13 @@ if __name__ == '__main__':
335
341
  # # arun(get_file_duration(content=content))
336
342
  # arun(get_file_duration(url=url))
337
343
 
338
- r = arun(to_url([]))
344
+ # r = arun(to_url([]))
345
+ text = "这是一个示例文本,包含一个图片:![image](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAABAAAAAQACAIAAADwf7zUAAAgAElEQ) 这张图片很棒。"
346
+
347
+ arun(markdown_base64_to_url(
348
+ text=text,
349
+ # pattern=r'!\[.*?\]\((data:image/.*?)\)'
350
+ # pattern=r'!\[.*?\]\((.*?)\)'
351
+
352
+ )
353
+ )